Vsync信号图形绘制-Choreographer源码分析

Android系统从4.1(API 16)开始加入Choreographer这个类来控制同步处理输入(Input)、动画(Animation)、绘制(Draw)三个UI操作。

Choreographer 即编舞者 负责协调app端的图形绘制,这里主要是等待vsync信号。垂直信号到来后就要开始准备绘制下一帧的数据。Choreographer主要是在ViewRootImpl中使用的,ViewRootImpl是view树的管理者,负责view树的逻辑处理及事件事件输入。

所有的绘制流程是从ViewRootImpl.java的scheduleTraversals开始的,这个方法会去请求vsync信号,并在信号到来时去绘制更新ui。

1
2
3
4
5
6
7
8
9
10
void scheduleTraversals() {
if (!mTraversalScheduled) {
mTraversalScheduled = true;
mTraversalBarrier = mHandler.getLooper().getQueue().postSyncBarrier();
//请求vsync信号,当vsync信号到达后开始mTraversalRunnable任务,垂直信号的接受由ChoreoGrpaher负责
mChoreographer.postCallback(
Choreographer.CALLBACK_TRAVERSAL, mTraversalRunnable, null);
……
}
}

这里我们不关心具体的绘制过程,主要是看Choreographer如何通过vysnc信号来协调界面的绘制。
这里首先是通过Choreographer对象mChoreographer对象post了一个回调,告诉Choreographer当vsync信号到达时帮我调用mTraversalRunable回调。这个回调的定义如下:

1
2
3
4
5
6
7
final class TraversalRunnable implements Runnable {
@Override
public void run() {
doTraversal();//vsync信号到达后执行绘制流程
}
}
final TraversalRunnable mTraversalRunnable = new TraversalRunnable();

doTraversal()内部会调用performTraversals()方法,从而开启view绘制的三大流程。

下面我们看看Choreographer是如何将vsync接受信号并告之ViewRoomImpl的刷新回调的。我们就从这个postCallback入手分析。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
public void postCallback(int callbackType, Runnable action, Object token) {
postCallbackDelayed(callbackType, action, token, 0);
}

public void postCallbackDelayed(int callbackType,
Runnable action, Object token, long delayMillis) {
……
postCallbackDelayedInternal(callbackType, action, token, delayMillis);
}

//post一个延时回调
private void postCallbackDelayedInternal(int callbackType,Object action, Object token, long delayMillis) {
synchronized (mLock) {
final long now = SystemClock.uptimeMillis();
final long dueTime = now + delayMillis;
//将请求的回调按照相应的类型添加到回调队列中,这里会根据一个失效时间来构造
mCallbackQueues[callbackType].addCallbackLocked(dueTime, action, token);

if (dueTime <= now) {//延时为0时调用 ,对于scheduleTraversals走这里
scheduleFrameLocked(now);
} else {
Message msg = mHandler.obtainMessage(MSG_DO_SCHEDULE_CALLBACK, action);
msg.arg1 = callbackType;
msg.setAsynchronous(true);
mHandler.sendMessageAtTime(msg, dueTime);
}
}
}

postCallback最终会调用postCallbackDelayedInternal,参数delayMillis为0,所以会调用scheduleFrameLocked进一步进行操作。这里需要注意的是每次请求都会添加到其类型对应的回调队列中, 这里的mCallbackQueues是一个根据类型区分的回调队列,有四种类型,分别是输入回调,动画回调和绘制回调以及Choreographer.CALLBACK_COMMIT。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
private void scheduleFrameLocked(long now) {
if (!mFrameScheduled) {
mFrameScheduled = true;
if (USE_VSYNC) {//使用vsync信号更新
if (DEBUG_FRAMES) {
Log.d(TAG, "Scheduling next frame on vsync.");
}

// If running on the Looper thread, then schedule the vsync immediately,
// otherwise post a message to schedule the vsync from the UI thread
// as soon as possible.
if (isRunningOnLooperThreadLocked()) {
scheduleVsyncLocked();
} else {//通过ui线程发送请求等待一个vsync信号
Message msg = mHandler.obtainMessage(MSG_DO_SCHEDULE_VSYNC);
msg.setAsynchronous(true);
mHandler.sendMessageAtFrontOfQueue(msg);
}
} else {
final long nextFrameTime = Math.max(
mLastFrameTimeNanos / TimeUtils.NANOS_PER_MS + sFrameDelay, now);
if (DEBUG_FRAMES) {
Log.d(TAG, "Scheduling next frame in " + (nextFrameTime - now) + " ms.");
}
Message msg = mHandler.obtainMessage(MSG_DO_FRAME);
msg.setAsynchronous(true);
mHandler.sendMessageAtTime(msg, nextFrameTime);
}
}
}
```

这个USE_VSYNC代表我们系统使用vsync信号进行屏幕信号的同步,这个方法是在我们的ui线程操作的,所以会发送MSG_DO_SCHEDULE_VSYNC请求vsync信号。这里的mHandler是一个FrameHandler

```java
private final class FrameHandler extends Handler {
public FrameHandler(Looper looper) {
super(looper);
}

@Override
public void handleMessage(Message msg) {
switch (msg.what) {
case MSG_DO_FRAME:
doFrame(System.nanoTime(), 0);
break;
case MSG_DO_SCHEDULE_VSYNC:
doScheduleVsync();//请求vsync信号
break;
case MSG_DO_SCHEDULE_CALLBACK:
doScheduleCallback(msg.arg1);
break;
}
}
}

接着调用doScheduleVsync

1
2
3
4
5
6
7
void doScheduleVsync() {
synchronized (mLock) {
if (mFrameScheduled) {
scheduleVsyncLocked();
}
}
}
1
2
3
private void scheduleVsyncLocked() {
mDisplayEventReceiver.scheduleVsync();//请求vsync信号
}

这里的mDisplayEventReceiver是一个FrameDisplayEventReceiver对象,它继承了DisplayEventReceiver,其中实现了其方法onVsync,这个方法就是当onVsync信号到达时的回调方法。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
//垂直信号到达
@Override
public void onVsync(long timestampNanos, int builtInDisplayId, int frame) {
// Ignore vsync from secondary display.
// This can be problematic because the call to scheduleVsync() is a one-shot.
// We need to ensure that we will still receive the vsync from the primary
// display which is the one we really care about. Ideally we should schedule
// vsync for a particular display.
// At this time Surface Flinger won't send us vsyncs for secondary displays
// but that could change in the future so let's log a message to help us remember
// that we need to fix this.
//并不是sf内置的display
if (builtInDisplayId != SurfaceControl.BUILT_IN_DISPLAY_ID_MAIN) {
Log.d(TAG, "Received vsync from secondary display, but we don't support "
+ "this case yet. Choreographer needs a way to explicitly request "
+ "vsync for a specific display to ensure it doesn't lose track "
+ "of its scheduled vsync.");
scheduleVsync();
return;
}

// Post the vsync event to the Handler.
// The idea is to prevent incoming vsync events from completely starving
// the message queue. If there are no messages in the queue with timestamps
// earlier than the frame time, then the vsync event will be processed immediately.
// Otherwise, messages that predate the vsync event will be handled first.
long now = System.nanoTime();
if (timestampNanos > now) {
Log.w(TAG, "Frame time is " + ((timestampNanos - now) * 0.000001f)
+ " ms in the future! Check that graphics HAL is generating vsync "
+ "timestamps using the correct timebase.");
timestampNanos = now;
}

if (mHavePendingVsync) {
Log.w(TAG, "Already have a pending vsync event. There should only be "
+ "one at a time.");
} else {
mHavePendingVsync = true;
}

mTimestampNanos = timestampNanos;
mFrame = frame;
Message msg = Message.obtain(mHandler, this);
msg.setAsynchronous(true);
mHandler.sendMessageAtTime(msg, timestampNanos / TimeUtils.NANOS_PER_MS);
}

@Override
public void run() {
mHavePendingVsync = false;
doFrame(mTimestampNanos, mFrame);//垂直信号到来触发
}

}

垂直信号vsync到来后会触发doFrame,在这个方法里面会进行我们的回调,即mTraversalRunnable。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
void doFrame(long frameTimeNanos, int frame) {
……
try {
Trace.traceBegin(Trace.TRACE_TAG_VIEW, "Choreographer#doFrame");
AnimationUtils.lockAnimationClock(frameTimeNanos / TimeUtils.NANOS_PER_MS);

mFrameInfo.markInputHandlingStart();
doCallbacks(Choreographer.CALLBACK_INPUT, frameTimeNanos);//回调输入事件的相关回调

mFrameInfo.markAnimationsStart();
doCallbacks(Choreographer.CALLBACK_ANIMATION, frameTimeNanos);//回调动画相关的回调

mFrameInfo.markPerformTraversalsStart();
doCallbacks(Choreographer.CALLBACK_TRAVERSAL, frameTimeNanos);//回调绘制相关的回调

doCallbacks(Choreographer.CALLBACK_COMMIT, frameTimeNanos);
} finally {
AnimationUtils.unlockAnimationClock();
Trace.traceEnd(Trace.TRACE_TAG_VIEW);
}
……
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
void doCallbacks(int callbackType, long frameTimeNanos) {
CallbackRecord callbacks;
synchronized (mLock) {
final long now = System.nanoTime();
callbacks = mCallbackQueues[callbackType].extractDueCallbacksLocked(
now / TimeUtils.NANOS_PER_MS);//获取相关类型的回调
if (callbacks == null) {
return;
}
mCallbacksRunning = true;
……
try {
Trace.traceBegin(Trace.TRACE_TAG_VIEW, CALLBACK_TRACE_TITLES[callbackType]);
for (CallbackRecord c = callbacks; c != null; c = c.next) {
if (DEBUG_FRAMES) {
Log.d(TAG, "RunCallback: type=" + callbackType
+ ", action=" + c.action + ", token=" + c.token
+ ", latencyMillis=" + (SystemClock.uptimeMillis() - c.dueTime));
}
c.run(frameTimeNanos);//调用回调
}
}
……
}

这里会根据类型从队列中取出相应的回调进行调用。这个就是上层对于vsync的处理。接下来我们看看底层的vsync信号是如何传递给Choreographer的。这就需要看看FrameDisplayEventReceiver的父类DisplayEventReceiver,这个DisplayEventReceiver会通过native层进行初始化,native层通过它的成员方法dispatchVsync将vsync信号报告给上层,即调用onVsync。我们先看其构造方法:

1
2
3
4
5
6
7
8
9
10
11
public DisplayEventReceiver(Looper looper, int vsyncSource) {
if (looper == null) {
throw new IllegalArgumentException("looper must not be null");
}

mMessageQueue = looper.getQueue();
mReceiverPtr = nativeInit(new WeakReference<DisplayEventReceiver>(this), mMessageQueue,
vsyncSource);//初始化接收器

mCloseGuard.open("dispose");
}

其构造方法是调用nativeInit进行初始化的,并将当前对象this作为一个接收器传递给底层。我们看看这个方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
frameworks/base/core/jni/android_view_DisplayEventReceiver.cpp
//注册显示事件接收器 receiverWeak即DisplayEventReceiver
static jlong nativeInit(JNIEnv* env, jclass clazz, jobject receiverWeak,
jobject messageQueueObj, jint vsyncSource) {
//这个messageQueue是通过ui线程的Looper构造的
sp<MessageQueue> messageQueue = android_os_MessageQueue_getMessageQueue(env, messageQueueObj);
if (messageQueue == NULL) {
jniThrowRuntimeException(env, "MessageQueue is not initialized.");
return 0;
}

sp<NativeDisplayEventReceiver> receiver = new NativeDisplayEventReceiver(env,
receiverWeak, messageQueue, vsyncSource);//创建本地接收器
status_t status = receiver->initialize();//初始化native的接受器
if (status) {
String8 message;
message.appendFormat("Failed to initialize display event receiver. status=%d", status);
jniThrowRuntimeException(env, message.string());
return 0;
}

receiver->incStrong(gDisplayEventReceiverClassInfo.clazz); // retain a reference for the object
return reinterpret_cast<jlong>(receiver.get());
}

在初始化方法中会创建本地的接受器对象,它同时是用我们传递的java层的接受器对象构造的,构造完成后即进行初始化。这个NativeDisplayEventReceiver的定义如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
//native的显示事件接受器
class NativeDisplayEventReceiver : public DisplayEventDispatcher {
public:
NativeDisplayEventReceiver(JNIEnv* env,
jobject receiverWeak, const sp<MessageQueue>& messageQueue, jint vsyncSource);

void dispose();

protected:
virtual ~NativeDisplayEventReceiver();

private:
jobject mReceiverWeakGlobal;//java层的接收器对象
sp<MessageQueue> mMessageQueue;
DisplayEventReceiver mReceiver;

virtual void dispatchVsync(nsecs_t timestamp, int32_t id, uint32_t count);
virtual void dispatchHotplug(nsecs_t timestamp, int32_t id, bool connected);
};

NativeDisplayEventReceiver继承自DisplayEventDispatcher,而DisplayEventDispatcher又实现了LooperCallback的接口handleEvent,这个方法是Looper的事件回调,也就是当Looper接受到消息后会对其进行调用,那么这个DisplayEventDispatcher就是负责进行消息事件的转发的。后面我们看看它是如何将vsync信号转发的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
//初始化receiver
status_t DisplayEventDispatcher::initialize() {
status_t result = mReceiver.initCheck();
if (result) {
ALOGW("Failed to initialize display event receiver, status=%d", result);
return result;
}

int rc = mLooper->addFd(mReceiver.getFd(), 0, Looper::EVENT_INPUT,
this, NULL);//添加事件侦听,这里的this代表注册的是一个LooperCallback,
//当事件到达后会调用其handleEvent方法,这个可以具体参见Looper的实现
if (rc < 0) {
return UNKNOWN_ERROR;
}
return OK;
}

这里的addFd将其注册到为Looper的事件回调,注意这里第四个参数this,因为DisplayEventDispatcher是继承LooperCallback的。这样当事件到来后会调用handleEvent。

还有,在NativeDisplayEventReceiver的内部持有一个DisplayEventReceiver对象,这个对象比较重要,它是负责和Sf打交道的。

1
2
3
4
5
6
7
8
9
10
11
DisplayEventReceiver::DisplayEventReceiver(ISurfaceComposer::VsyncSource vsyncSource) {
sp<ISurfaceComposer> sf(ComposerService::getComposerService());
if (sf != NULL) {
//通过sf建立连接,这里就应该很熟悉了,这个连接创建好后在第一次引用时会去注册到EventThread中
mEventConnection = sf->createDisplayEventConnection(vsyncSource);
if (mEventConnection != NULL) {
mDataChannel = std::make_unique<gui::BitTube>();
mEventConnection->stealReceiveChannel(mDataChannel.get());
}
}
}
1
2
3
4
5
6
7
8
sp<IDisplayEventConnection> SurfaceFlinger::createDisplayEventConnection(
ISurfaceComposer::VsyncSource vsyncSource) {
if (vsyncSource == eVsyncSourceSurfaceFlinger) {
return mSFEventThread->createEventConnection();
} else {//如果时app端的就走这里
return mEventThread->createEventConnection();
}
}

刚刚说到DisplayEventReceiver是和Sf打交道的, 它和NativeDisplayEventReceiver是同时创建的。在其构造方法中首先通过sf的createDisplayEventConnection创建一个连接对象即Connection对象,mEventThread是管理绘图延时对象的vsync信号处理,它是一个EventThread。Connection就是由它创建的。

1
2
3
sp<EventThread::Connection> EventThread::createEventConnection() const {
return new Connection(const_cast<EventThread*>(this));//创建一个新的连接,这个连接在第一个被引用时会去进行注册
}

这个connection在第一次被引用时会进行注册,即将其添加到mEventThread的连接队列,在vsync消息到来时调用其postEvent方法对事件进行转发。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
void EventThread::Connection::onFirstRef() {
// NOTE: mEventThread doesn't hold a strong reference on us
mEventThread->registerDisplayEventConnection(this);
}

//注册创建的连接,实际上时将创建的连接添加到监听队列,并通知等待的线程
status_t EventThread::registerDisplayEventConnection(
const sp<EventThread::Connection>& connection) {
Mutex::Autolock _l(mLock);
mDisplayEventConnections.add(connection);//添加到连接队列中
mCondition.broadcast();//唤醒等待的线程
return NO_ERROR;
}

bool EventThread::threadLoop() {
DisplayEventReceiver::Event event;
Vector< sp<EventThread::Connection> > signalConnections;
signalConnections = waitForEvent(&event);

// dispatch events to listeners...
const size_t count = signalConnections.size();
for (size_t i=0 ; i<count ; i++) {
const sp<Connection>& conn(signalConnections[i]);
// now see if we still need to report this event
status_t err = conn->postEvent(event);//post事件
……
}
return true;
}

status_t EventThread::Connection::postEvent(
const DisplayEventReceiver::Event& event) {
ssize_t size = DisplayEventReceiver::sendEvents(&mChannel, &event, 1);
return size < 0 ? status_t(size) : status_t(NO_ERROR);
}

postEvent实际上会调用DisplayEventReceiver的setEvents方法,这样会触发getEvents的回调从而通过DisplayEventDispatcher的handleEvent回调方法。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
ssize_t DisplayEventReceiver::sendEvents(gui::BitTube* dataChannel,
Event const* events, size_t count)
{
return gui::BitTube::sendObjects(dataChannel, events, count);//发送接受到的事件
}

int DisplayEventDispatcher::handleEvent(int, int events, void*){
……
// Drain all pending events, keep the last vsync.
nsecs_t vsyncTimestamp;
int32_t vsyncDisplayId;
uint32_t vsyncCount;
if (processPendingEvents(&vsyncTimestamp, &vsyncDisplayId, &vsyncCount)) {//丢弃所有的等待事件,只保留上次的vsync信号事件
ALOGV("dispatcher %p ~ Vsync pulse: timestamp=%" PRId64 ", id=%d, count=%d",
this, ns2ms(vsyncTimestamp), vsyncDisplayId, vsyncCount);
mWaitingForVsync = false;
dispatchVsync(vsyncTimestamp, vsyncDisplayId, vsyncCount);//分派vsync信号
}

return 1; // keep the callback
}

bool DisplayEventDispatcher::processPendingEvents(
nsecs_t* outTimestamp, int32_t* outId, uint32_t* outCount) {
bool gotVsync = false;
DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
ssize_t n;
while ((n = mReceiver.getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {//等待事件到来
ALOGV("dispatcher %p ~ Read %d events.", this, int(n));
for (ssize_t i = 0; i < n; i++) {
const DisplayEventReceiver::Event& ev = buf[i];
switch (ev.header.type) {
case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
// Later vsync events will just overwrite the info from earlier
// ones. That's fine, we only care about the most recent.
gotVsync = true;
*outTimestamp = ev.header.timestamp;//timestamp
*outId = ev.header.id;//id
*outCount = ev.vsync.count;//count
break;
case DisplayEventReceiver::DISPLAY_EVENT_HOTPLUG:
dispatchHotplug(ev.header.timestamp, ev.header.id, ev.hotplug.connected);
break;
}
}
return gotVsync;
}
}

void NativeDisplayEventReceiver::dispatchVsync(nsecs_t timestamp, int32_t id, uint32_t count) {
JNIEnv* env = AndroidRuntime::getJNIEnv();

ScopedLocalRef<jobject> receiverObj(env, jniGetReferent(env, mReceiverWeakGlobal));
if (receiverObj.get()) {
ALOGV("receiver %p ~ Invoking vsync handler.", this);
env->CallVoidMethod(receiverObj.get(),
gDisplayEventReceiverClassInfo.dispatchVsync, timestamp, id, count);
ALOGV("receiver %p ~ Returned from vsync handler.", this);
}

mMessageQueue->raiseAndClearException(env, "dispatchVsync");
}

最终调用NativeDisplayEventReceiver的dispatchVsync回调给java层的dispatchVsync,这样vsync信号就传递给上层应用了。

坚持原创技术分享,您的支持将鼓励我继续创作!